int
xen_ia64_fpswa_revision(struct xc_dom_image *dom, unsigned int *revision)
{
- int ret = -1;
+ int ret;
DECLARE_HYPERCALL;
hypercall.op = __HYPERVISOR_ia64_dom0vp_op;
hypercall.arg[0] = IA64_DOM0VP_fpswa_revision;
if (lock_pages(revision, sizeof(*revision)) != 0) {
PERROR("Could not lock memory for xen fpswa hypercall");
- goto out;
+ return -1;
}
ret = do_xen_hypercall(dom->guest_xc, &hypercall);
unlock_pages(revision, sizeof(*revision));
-out:
+
return ret;
}
mpaddr / page_size);
if (ret != NULL)
ret = (void*)((unsigned long)ret | (mpaddr & (page_size - 1)));
+
return ret;
}
}
rc = dom_fw_init(d, brkimm, bp, imva_tables_base,
(unsigned long)imva_hypercall_base, maxmem);
+
out:
if (imva_hypercall_base != NULL)
xen_ia64_dom_fw_unmap(d, imva_hypercall_base);
xen_ia64_dom_fw_unmap(d, imva);
if (bp != NULL)
xen_ia64_dom_fw_unmap(d, bp);
+
return rc;
}
mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
PROT_READ|PROT_WRITE, pfn);
if (mem == NULL) {
- ERROR("cannot map page");
- return -1;
+ ERROR("cannot map page");
+ return -1;
}
if (!read_exact(io_fd, mem, PAGE_SIZE)) {
- ERROR("Error when reading from state file (5)");
- munmap(mem, PAGE_SIZE);
- return -1;
+ ERROR("Error when reading from state file (5)");
+ munmap(mem, PAGE_SIZE);
+ return -1;
}
munmap(mem, PAGE_SIZE);
return 0;
DPRINTF("xc_linux_restore start: p2m_size = %lx\n", p2m_size);
if (!read_exact(io_fd, &ver, sizeof(unsigned long))) {
- ERROR("Error when reading version");
- goto out;
+ ERROR("Error when reading version");
+ goto out;
}
if (ver != 1) {
- ERROR("version of save doesn't match");
- goto out;
+ ERROR("version of save doesn't match");
+ goto out;
}
if (lock_pages(&ctxt, sizeof(ctxt))) {
ERROR("Error when reading batch size");
goto out;
}
- if (gmfn == INVALID_MFN)
- break;
+ if (gmfn == INVALID_MFN)
+ break;
- if (read_page(xc_handle, io_fd, dom, gmfn) < 0)
- goto out;
+ if (read_page(xc_handle, io_fd, dom, gmfn) < 0)
+ goto out;
}
DPRINTF("Received all pages\n");
goto out;
}
- DPRINTF ("Try to free %u pages\n", count);
+ DPRINTF ("Try to free %u pages\n", count);
for (i = 0; i < count; i++) {
- volatile unsigned long pfn;
+ volatile unsigned long pfn;
struct xen_memory_reservation reservation = {
.nr_extents = 1,
.domid = dom
};
set_xen_guest_handle(reservation.extent_start,
- (unsigned long *)&pfn);
+ (unsigned long *)&pfn);
- pfn = pfntab[i];
+ pfn = pfntab[i];
rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
&reservation);
if (rc != 1) {
}
}
- DPRINTF("Decreased reservation by %d pages\n", count);
+ DPRINTF("Decreased reservation by %d pages\n", count);
}
domctl.u.vcpucontext.vcpu = 0;
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
if (xc_domctl(xc_handle, &domctl) != 0) {
- ERROR("Couldn't set vcpu context");
- goto out;
+ ERROR("Couldn't set vcpu context");
+ goto out;
}
/* Second to set registers... */
domctl.u.vcpucontext.vcpu = 0;
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt);
if (xc_domctl(xc_handle, &domctl) != 0) {
- ERROR("Couldn't set vcpu context");
- goto out;
+ ERROR("Couldn't set vcpu context");
+ goto out;
}
/* Just a check. */
if (xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, &ctxt)) {
ERROR("Could not get vcpu context");
- goto out;
+ goto out;
}
/* Then get privreg page. */
if (read_page(xc_handle, io_fd, dom, ctxt.privregs_pfn) < 0) {
- ERROR("Could not read vcpu privregs");
- goto out;
+ ERROR("Could not read vcpu privregs");
+ goto out;
}
/* Read shared info. */
PROT_READ|PROT_WRITE, shared_info_frame);
if (shared_info == NULL) {
ERROR("cannot map page");
- goto out;
+ goto out;
}
if (!read_exact(io_fd, shared_info, PAGE_SIZE)) {
ERROR("Error when reading shared_info page");
munmap(shared_info, PAGE_SIZE);
- goto out;
+ goto out;
}
/* clear any pending events and the selector */
xc_domain_destroy(xc_handle, dom);
if (page_array != NULL)
- free(page_array);
+ free(page_array);
unlock_pages(&ctxt, sizeof(ctxt));
/*
* Generic EFI table header
*/
-typedef struct {
+typedef struct {
uint64_t signature;
uint32_t revision;
uint32_t headersize;
dom_efi_hypercall_patch(uint64_t brkimm, unsigned long paddr,
unsigned long hypercall, unsigned long imva)
{
- build_hypercall_bundle((uint64_t *)(imva + paddr - FW_HYPERCALL_BASE_PADDR),
- brkimm, hypercall, 1);
+ build_hypercall_bundle((uint64_t *)(imva + paddr -
+ FW_HYPERCALL_BASE_PADDR),
+ brkimm, hypercall, 1);
}
// builds a hypercall bundle at domain physical address
unsigned long hypercall,unsigned long ret,
unsigned long imva)
{
- build_hypercall_bundle((uint64_t *)(imva + paddr - FW_HYPERCALL_BASE_PADDR),
- brkimm, hypercall, ret);
+ build_hypercall_bundle((uint64_t *)(imva + paddr -
+ FW_HYPERCALL_BASE_PADDR),
+ brkimm, hypercall, ret);
}
static void
if (x->phys_addr < y->phys_addr)
return -1;
- // num_pages == 0 is allowed.
+ /* num_pages == 0 is allowed. */
if (x->num_pages > y->num_pages)
return 1;
if (x->num_pages < y->num_pages)
enable = 1;
else
enable = 0;
+
if (lsapic->flags.enabled && enable) {
printk("enable lsapic entry: 0x%lx\n", (u64)lsapic);
lsapic->id = lsapic_nbr;
touch_acpi_table(void)
{
lsapic_nbr = 0;
+
if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
printk("Error parsing MADT - no LAPIC entries\n");
if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
efi_systable_init_dom0(struct fw_tables *tables)
{
int i = 1;
+
/* Write messages to the console. */
touch_acpi_table();
md->type == EFI_CONVENTIONAL_MEMORY) {
unsigned long start = md->phys_addr & PAGE_MASK;
unsigned long end = md->phys_addr +
- (md->num_pages << EFI_PAGE_SHIFT);
+ (md->num_pages << EFI_PAGE_SHIFT);
if (end == start) {
/* md->num_pages = 0 is allowed. */
if (efi_mmio(addr, PAGE_SIZE)) {
unsigned long flags;
flags = ASSIGN_writable | ASSIGN_nocache;
- assign_domain_mmio_page(d, addr, addr,
- PAGE_SIZE, flags);
+ assign_domain_mmio_page(d, addr, addr, PAGE_SIZE, flags);
}
}
setup_dom0_memmap_info(d, tables, &num_mds);
efi_systable_init_domu(struct fw_tables *tables)
{
int i = 1;
+
printk(XENLOG_GUEST XENLOG_INFO "DomainU EFI build up:");
tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
/* 3 = start info page, xenstore page and console page */
paddr_end = paddr_start + memmap_info_size + 3 * PAGE_SIZE;
memmap_info = xen_ia64_dom_fw_map(d, paddr_start);
+
if (memmap_info->efi_memmap_size == 0) {
create_memmap = 1;
} else if (memmap_info->efi_memdesc_size != sizeof(md[0]) ||
* memory map. create it for compatibility
*/
memmap_info->efi_memdesc_size = sizeof(md[0]);
- memmap_info->efi_memdesc_version =
- EFI_MEMORY_DESCRIPTOR_VERSION;
+ memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
memmap_info->efi_memmap_size = 1 * sizeof(md[0]);
+
md = (efi_memory_desc_t*)&memmap_info->memdesc;
md[num_mds].type = EFI_CONVENTIONAL_MEMORY;
md[num_mds].pad = 0;
memmap_start = &memmap_info->memdesc;
memmap_end = memmap_start + memmap_info->efi_memmap_size;
+
/* XXX Currently the table must be in a single page. */
if ((unsigned long)memmap_end > (unsigned long)memmap_info + PAGE_SIZE) {
xen_ia64_dom_fw_unmap(d, memmap_info);
for (p = memmap_start; p < memmap_end; p += memmap_info->efi_memdesc_size) {
unsigned long start;
unsigned long end;
+
md = p;
start = md->phys_addr;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
/* exclude [paddr_start, paddr_end) */
if (paddr_end <= start || end <= paddr_start) {
- MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
- start, end);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start, end);
} else if (paddr_start <= start && paddr_end < end) {
- MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
- paddr_end, end);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, paddr_end, end);
} else if (start < paddr_start && end <= paddr_end) {
- MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
- start, paddr_start);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start, paddr_start);
} else {
- MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
- start, paddr_start);
- MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
- paddr_end, end);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start, paddr_start);
+ MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, paddr_end, end);
}
}
/* memmap info page. */
- MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB,
- paddr_start, paddr_end);
+ MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB, paddr_start, paddr_end);
/* Create an entry for IO ports. */
MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
- IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
+ IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
num_mds = i;
sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
{
if (fpswa_interface == NULL)
return -ENOSYS;
+
*revision = fpswa_interface->revision;
return 0;
}
memmap_info->efi_memdesc_version !=
EFI_MEMORY_DESCRIPTOR_VERSION)
return -EINVAL;
+
/* only 1page case is supported */
if (d->shared_info->arch.memmap_info_num_pages != 1)
return -ENOSYS;
if (d->arch.convmem_end == 0)
d->arch.convmem_end = d->max_pages << PAGE_SHIFT;
+
for (p = memmap_start; p < memmap_end; p += memmap_info->efi_memdesc_size) {
unsigned long end;
+
md = p;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
if (md->attribute == EFI_MEMORY_WB &&
md->type == EFI_CONVENTIONAL_MEMORY &&
md->num_pages > 0 &&
static inline void
assign_new_domain_page_if_dom0(struct domain *d, unsigned long mpaddr)
{
- if (d == dom0)
- assign_new_domain0_page(d, mpaddr);
+ if (d == dom0)
+ assign_new_domain0_page(d, mpaddr);
}
int